#define sizeof_vcpu_shift 3
#ifdef CONFIG_SMP
-#define XEN_GET_VCPU_INFO(reg)
#define preempt_disable(reg) incl TI_preempt_count(reg)
#define preempt_enable(reg) decl TI_preempt_count(reg)
-#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp) ; \
+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp) ; \
movl TI_cpu(%ebp),reg ; \
shl $sizeof_vcpu_shift,reg ; \
addl HYPERVISOR_shared_info,reg
-#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
-#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
-#define Ux00 0xff
-#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_BLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
- XEN_LOCKED_BLOCK_EVENTS(reg) ; \
- XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNBLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
- movb $0,evtchn_upcall_mask(reg) ; \
- XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp) ; \
- XEN_LOCK_VCPU_INFO_SMP(reg) ; \
- movb evtchn_upcall_mask(reg), tmp ; \
- movb tmp, off(%esp) ; \
- XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
#else
-#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-#define XEN_LOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
-#define Ux00 0x00
-#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_BLOCK_EVENTS(reg) XEN_LOCKED_BLOCK_EVENTS(reg)
-#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
- movb evtchn_upcall_mask(reg), tmp; \
- movb tmp, off(%esp)
+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
#endif
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#ifdef CONFIG_PREEMPT
#define resume_kernel restore_all
#endif
-#define SAVE_ALL_NO_EVENTMASK \
+#define SAVE_ALL \
cld; \
pushl %es; \
pushl %ds; \
pushl %ebx; \
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
- movl %edx, %es;
-
-#define SAVE_ALL \
- SAVE_ALL_NO_EVENTMASK; \
- XEN_GET_VCPU_INFO(%esi); \
- XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
+ movl %edx, %es
#define RESTORE_INT_REGS \
popl %ebx; \
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax
- XEN_GET_VCPU_INFO(%esi)
jmp syscall_exit
/*
testl $(VM_MASK | 2), %eax
jz resume_kernel # returning to kernel or vm86-space
ENTRY(resume_userspace)
- XEN_GET_VCPU_INFO(%esi)
XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- XEN_GET_VCPU_INFO(%esi)
XEN_BLOCK_EVENTS(%esi)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_all
jnz resume_vm86
movb EVENT_MASK(%esp), %al
notb %al # %al == ~saved_mask
- XEN_LOCK_VCPU_INFO_SMP(%esi)
+ XEN_GET_VCPU_INFO(%esi)
andb evtchn_upcall_mask(%esi),%al
andb $1,%al # %al == mask & ~saved_mask
jnz restore_all_enable_events # != 0 => reenable event delivery
- XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ XEN_PUT_VCPU_INFO(%esi)
RESTORE_ALL
resume_vm86:
movl %ecx, %ds
movl %ecx, %es
movl %esp,%eax # pt_regs pointer
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
call *%edi
jmp ret_from_exception
# activation and restart the handler using the previous one.
ENTRY(hypervisor_callback)
pushl %eax
- SAVE_ALL_NO_EVENTMASK
+ SAVE_ALL
movl EIP(%esp),%eax
cmpl $scrit,%eax
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
-11: XEN_GET_VCPU_INFO(%esi)
- movb $0, EVENT_MASK(%esp)
- push %esp
+11: push %esp
call evtchn_do_upcall
add $4,%esp
jmp ret_from_intr
ALIGN
restore_all_enable_events:
- XEN_UNBLOCK_EVENTS(%esi)
+ XEN_LOCKED_UNBLOCK_EVENTS(%esi)
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%esi)
jnz 14f # process more events if necessary...
- XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ XEN_PUT_VCPU_INFO(%esi)
RESTORE_ALL
14: XEN_LOCKED_BLOCK_EVENTS(%esi)
- XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ XEN_PUT_VCPU_INFO(%esi)
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
# [How we do the fixup]. We want to merge the current stack frame with the
critical_region_fixup:
addl $critical_fixup_table-scrit,%eax
movzbl (%eax),%eax # %eax contains num bytes popped
-#ifdef CONFIG_SMP
- cmpb $0xff,%al
+ cmpb $0xff,%al # 0xff => vcpu_info critical region
jne 15f
- add $1,%al
GET_THREAD_INFO(%ebp)
- XEN_UNLOCK_VCPU_INFO_SMP(%esi)
-15:
-#endif
- mov %esp,%esi
+ XEN_PUT_VCPU_INFO(%esi) # abort vcpu_info critical region
+ xorl %eax,%eax
+15: mov %esp,%esi
add %eax,%esi # %esi points at end of src region
mov %esp,%edi
add $0x34,%edi # %edi points at end of dst region
jmp 11b
critical_fixup_table:
- .byte Ux00,Ux00,Ux00 # testb $0xff,(%esi) = XEN_TEST_PENDING
- .byte Ux00,Ux00 # jnz 14f
- XEN_UNLOCK_VCPU_INFO_SMP_fixup
+ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = XEN_TEST_PENDING
+ .byte 0xff,0xff # jnz 14f
+ XEN_PUT_VCPU_INFO_fixup
.byte 0x00 # pop %ebx
.byte 0x04 # pop %ecx
.byte 0x08 # pop %edx
.byte 0x20 # pop %es
.byte 0x24,0x24,0x24 # add $4,%esp
.byte 0x28 # iret
- .byte Ux00,Ux00,Ux00,Ux00 # movb $1,1(%esi)
- XEN_UNLOCK_VCPU_INFO_SMP_fixup
+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
+ XEN_PUT_VCPU_INFO_fixup
.byte 0x00,0x00 # jmp 11b
# Hypervisor uses this for application faults while it executes.
movl %eax, %ds
movl %eax, %es
movl %esp,%eax /* pt_regs pointer */
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
call do_page_fault
jmp ret_from_exception
tb->error_code = regs->error_code;
}
if ( TI_GET_IF(ti) )
- ed->vcpu_info->evtchn_upcall_mask = 1;
+ tb->flags |= TBF_INTERRUPT;
return 0;
xen_fault:
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->vcpu_info->evtchn_upcall_mask = 1;
+ tb->flags |= TBF_INTERRUPT;
return 0;
}
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->vcpu_info->evtchn_upcall_mask = 1;
+ tb->flags |= TBF_INTERRUPT;
ed->arch.guest_cr2 = addr;
}
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->vcpu_info->evtchn_upcall_mask = 1;
+ tb->flags |= TBF_INTERRUPT;
return 0;
gp_in_kernel:
OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
+ OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
movw %ax,TRAPBOUNCE_cs(%edx)
movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
- movl EDOMAIN_vcpu_info(%ebx),%eax
- movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
jmp test_all_events
ALIGN
movl UREGS_esp+4(%esp),%esi
FLT13: movl UREGS_ss+4(%esp),%gs
1: /* Construct a stack frame: EFLAGS, CS/EIP */
+ movb TRAPBOUNCE_flags(%edx),%cl
subl $12,%esi
movl UREGS_eip+4(%esp),%eax
FLT14: movl %eax,%gs:(%esi)
- movl UREGS_cs+4(%esp),%eax
+ movl EDOMAIN_vcpu_info(%ebx),%eax
+ pushl VCPUINFO_upcall_mask(%eax)
+ testb $TBF_INTERRUPT,%cl
+ setnz VCPUINFO_upcall_mask(%eax) # TBF_INTERRUPT -> clear upcall mask
+ popl %eax
+ shll $16,%eax # Bits 16-23: saved_upcall_mask
+ movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
FLT15: movl %eax,%gs:4(%esi)
movl UREGS_eflags+4(%esp),%eax
FLT16: movl %eax,%gs:8(%esi)
- movb TRAPBOUNCE_flags(%edx),%cl
test $TBF_EXCEPTION_ERRCODE,%cl
jz 1f
subl $4,%esi # push error_code onto guest frame
u32 disp32 = 0;
u8 *eip; /* ptr to instruction start */
u8 *pb, b; /* ptr into instr. / current instr. byte */
- u32 *pseg = NULL; /* segment for memory operand (NULL=default) */
+ u16 *pseg = NULL; /* segment for memory operand (NULL=default) */
/* WARNING: We only work for ring-3 segments. */
if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->vcpu_info->evtchn_upcall_mask = 1;
+ tb->flags |= TBF_INTERRUPT;
}
return EXCRET_fault_fixed;
OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
OFFSET(UREGS_ss, struct cpu_user_regs, ss);
+ OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
call create_bounce_frame
- movq EDOMAIN_vcpu_info(%rbx),%rax
- movb $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
jmp test_all_events
#ifdef CONFIG_VMX
movq $HYPERVISOR_VIRT_END+60,%rax
cmpq %rax,%rsi
jb domain_crash_synchronous # Above Xen private area? Then okay.
-1: subq $40,%rsi
+1: movb TRAPBOUNCE_flags(%rdx),%cl
+ subq $40,%rsi
movq UREGS_ss+8(%rsp),%rax
FLT2: movq %rax,32(%rsi) # SS
movq UREGS_rsp+8(%rsp),%rax
FLT3: movq %rax,24(%rsi) # RSP
movq UREGS_eflags+8(%rsp),%rax
FLT4: movq %rax,16(%rsi) # RFLAGS
- movq UREGS_cs+8(%rsp),%rax
-FLT5: movq %rax,8(%rsi) # CS
+ movq EDOMAIN_vcpu_info(%rbx),%rax
+ pushq VCPUINFO_upcall_mask(%rax)
+ testb $TBF_INTERRUPT,%cl
+ setnz VCPUINFO_upcall_mask(%eax)# TBF_INTERRUPT -> clear upcall mask
+ popq %rax
+ shll $16,%eax # Bits 16-23: saved_upcall_mask
+ movw UREGS_cs+8(%esp),%ax # Bits 0-15: CS
+FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask
movq UREGS_rip+8(%rsp),%rax
FLT6: movq %rax,(%rsi) # RIP
- movb TRAPBOUNCE_flags(%rdx),%cl
testb $TBF_EXCEPTION_ERRCODE,%cl
jz 1f
subq $8,%rsi
void show_registers(struct cpu_user_regs *regs)
{
- printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n",
+ printk("CPU: %d\nEIP: %04x:[<%016lx>] \nEFLAGS: %016lx\n",
smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
printk("rax: %016lx rbx: %016lx rcx: %016lx rdx: %016lx\n",
regs->rax, regs->rbx, regs->rcx, regs->rdx);
u16 error_code; /* private */
u16 entry_vector; /* private */
u32 eip;
- u32 cs;
+ u16 cs;
+ u8 saved_upcall_mask;
+ u8 _pad0;
u32 eflags;
u32 esp;
- u32 ss;
- u32 es;
- u32 ds;
- u32 fs;
- u32 gs;
+ u16 ss, _pad1;
+ u16 es, _pad2;
+ u16 ds, _pad3;
+ u16 fs, _pad4;
+ u16 gs, _pad5;
} cpu_user_regs_t;
typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
u64 r14;
u64 r13;
u64 r12;
- union { u64 rbp, ebp; } PACKED;
- union { u64 rbx, ebx; } PACKED;
+ union { u64 rbp, ebp; };
+ union { u64 rbx, ebx; };
u64 r11;
u64 r10;
u64 r9;
u64 r8;
- union { u64 rax, eax; } PACKED;
- union { u64 rcx, ecx; } PACKED;
- union { u64 rdx, edx; } PACKED;
- union { u64 rsi, esi; } PACKED;
- union { u64 rdi, edi; } PACKED;
+ union { u64 rax, eax; };
+ union { u64 rcx, ecx; };
+ union { u64 rdx, edx; };
+ union { u64 rsi, esi; };
+ union { u64 rdi, edi; };
u32 error_code; /* private */
u32 entry_vector; /* private */
- union { u64 rip, eip; } PACKED;
- u64 cs;
- union { u64 rflags, eflags; } PACKED;
- union { u64 rsp, esp; } PACKED;
- u64 ss;
- u64 es;
- u64 ds;
- u64 fs; /* Non-zero => takes precedence over fs_base. */
- u64 gs; /* Non-zero => takes precedence over gs_base_user. */
+ union { u64 rip, eip; };
+ u16 cs;
+ u8 saved_upcall_mask;
+ u8 _pad0[5];
+ union { u64 rflags, eflags; };
+ union { u64 rsp, esp; };
+ u16 ss, _pad1[3];
+ u16 es, _pad2[3];
+ u16 ds, _pad3[3];
+ u16 fs, _pad4[3]; /* Non-zero => takes precedence over fs_base. */
+ u16 gs, _pad5[3]; /* Non-zero => takes precedence over gs_base_user. */
} cpu_user_regs_t;
typedef u64 tsc_timestamp_t; /* RDTSC timestamp */